Introduction

Build Website Website Website Website

Packages & Data

Packages

This document was prepared on 2021-11-11.

library(tidyverse)
library(patchwork)
library(glmmTMB)
library(report)
library(parameters)
library(correlation)
library(modelbased)
library(performance)
library(see)

summary(report::report(sessionInfo()))

The analysis was done using the R Statistical language (v4.1.1; R Core Team, 2021) on Windows 10 x64, using the packages effectsize (v0.4.5.4), ggplot2 (v3.3.5), stringr (v1.4.0), forcats (v0.5.1), tidyr (v1.1.4), readr (v2.0.2), dplyr (v1.0.7), tibble (v3.1.4), purrr (v0.3.4), parameters (v0.15.0.1), insight (v0.14.5.1), performance (v0.7.3.5), see (v0.7.0.1), easystats (v0.4.3), correlation (v0.7.1.1), modelbased (v0.7.0.1), bayestestR (v0.11.5), report (v0.4.0), datawizard (v0.2.1.9000), glmmTMB (v1.1.2.3), patchwork (v1.1.1) and tidyverse (v1.3.1).

Data

# setwd("C:/Users/user/Desktop/Sputnik/2019-23/DeceptionInteroTom")

df <- read.csv("data/data_combined.csv") %>% 
  mutate(ID = as.factor(paste0("S", ID)),
         condition = as.factor(condition),
         item = as.factor(item),
         style = as.factor(style),
         instruction = as.factor(instruction)) |> 
  #TODO: This renaming should be done at the preprocessing stage
  rename("Participant" = "ID",
         "Condition" = "condition",
         "Item" = "item",
         "Phrasing" = "style",
         "Answer" = "instruction",
         "YONI_Total" = "yoni_total",
         "YONI_Affective" = "yoni_affective",
         "YONI_Cognitive" = "yoni_cognitive",
         "YONI_Physical" = "yoni_physical",
         "BES_Total" = "BES_total",
         "BES_Cognitive" = "BES_cognitive",
         "BES_Affective" = "BES_affective",
         "HCT_Confidence" = "HCT_confidence",
         "HCT_Accuracy" = "HCT_accuracy",
         "HCT_Awareness" = "HCT_awareness",
         "MAIA_Total" = "MAIA_total",
         "MAIA_AttentionRegulation" = "MAIA_attention_regulation",
         "MAIA_BodyListening" = "MAIA_body_listening",
         "MAIA_EmotionalAwareness" = "MAIA_emotional_awareness",
         "MAIA_NotDistracting" = "MAIA_not_distracting",
         "MAIA_NotWorrying" = "MAIA_not_worrying",
         "MAIA_Noticing" = "MAIA_noticing",
         "MAIA_SelfRegulation" = "MAIA_self_regulation",
         "MAIA_Trusting" = "MAIA_trusting",
         "LIE_Ability" = "lie_ability",
         "LIE_Frequency" = "lie_frequency",
         "LIE_Negativity" = "lie_negativity",
         "LIE_Contextuality" = "lie_contextuality",
         "Confidence" = "DT_confidence",
         "RT" = "DT_RT") |> 
  mutate(Answer = fct_recode(Answer, Lie = "LIE", Truth = "TRUTH")) |> 
  select(-HCT_guess, -HCT_noguess, -HCT_onebreath)

cat(paste("The data consists of",
          report::report_participants(df,
                                      participants = "Participant",
                                      sex = "Gender",
                                      age = "Age")))

The data consists of 30 participants (Mean age = 21.1, SD = 2.1, range: [18, 25]; 63.3% females)

Measures

Theory of Mind / Empathy

Yoni Task

df %>% 
  group_by(Participant) %>% 
  select(starts_with("YONI_")) |> 
  summarise_all(mean, na.rm=TRUE) |> 
  tidyr::pivot_longer(-Participant, values_to = "Scores") |> 
  mutate(name = paste0(str_replace(name, "_", " ("), ")")) |> 
  ggplot(aes(x = Scores, fill = name)) +
  geom_density() +
  scale_fill_manual(values = c("YONI (Affective)" = "Purple",
                                 "YONI (Cognitive)" = "Blue",
                                 "YONI (Physical)" = "Green",
                                 "YONI (Total)"= "DarkBlue"),
                      guide = "none") +
  facet_wrap(~name, scales = "free")

BES Questionnaire

df %>% 
  group_by(Participant) %>% 
  select(starts_with("BES_")) |> 
  summarise_all(mean, na.rm=TRUE) |> 
  tidyr::pivot_longer(-Participant, values_to = "Scores") |> 
  mutate(name = paste0(str_replace(name, "_", " ("), ")")) |> 
  ggplot(aes(x = Scores, fill = name)) +
  geom_density() +
  scale_fill_manual(values = c("BES (Affective)" = "Purple",
                               "BES (Cognitive)" = "Blue",
                               "BES (Total)"= "DarkBlue"),
                      guide = "none") +
  facet_wrap(~name, scales = "free")

Interoception

Heartbeat Counting Task (HCT)

df %>% 
  group_by(Participant) %>% 
  select(starts_with("HCT_")) |> 
  summarise_all(mean, na.rm=TRUE) |> 
  tidyr::pivot_longer(-Participant, values_to = "Scores") |> 
  mutate(name = paste0(str_replace(name, "_", " ("), ")")) |> 
  ggplot(aes(x = Scores, fill = name)) +
  geom_density() +
  scale_fill_manual(values = c("HCT (Accuracy)" = "Red",
                               "HCT (Awareness)" = "Orange",
                               "HCT (Confidence)"= "DarkOrange"),
                      guide = "none") +
  facet_wrap(~name, scales = "free")
> Warning: Removed 6 rows containing non-finite values (stat_density).

MAIA

df %>% 
  group_by(Participant) %>% 
  select(starts_with("MAIA_")) |> 
  summarise_all(mean, na.rm=TRUE) |> 
  tidyr::pivot_longer(-Participant, values_to = "Scores") |> 
  mutate(name = paste0(str_replace(name, "_", " ("), ")")) |> 
  ggplot(aes(x = Scores, fill = name)) +
  geom_density() +
  scale_fill_brewer(palette = "Reds", guide = "none") +
  facet_wrap(~name, scales = "free")

Deception

LIE Scale

df %>% 
  group_by(Participant) %>% 
  select(starts_with("LIE_")) |> 
  summarise_all(mean, na.rm=TRUE) |> 
  tidyr::pivot_longer(-Participant, values_to = "Scores") |> 
  mutate(name = paste0(str_replace(name, "_", " ("), ")")) |> 
  ggplot(aes(x = Scores, fill = name)) +
  geom_density() +
  scale_fill_manual(values = c("LIE (Ability)" = "#2196F3",
                               "LIE (Frequency)" = "#4CAF50",
                               "LIE (Contextuality)"= "#FF9800",
                               "LIE (Negativity)"= "#E91E63"),
                    guide = "none") +
  facet_wrap(~name, scales = "free")

Deception Task

Summary Table

df |> 
  group_by(Participant, Answer) |> 
  summarise(Confidence = paste(insight::format_value(mean(Confidence, na.rm = TRUE)),
                               " +- ",
                               insight::format_value(sd(Confidence, na.rm = TRUE))),
            RT = paste(insight::format_value(mean(RT, na.rm = TRUE)),
                       " +- ",
                       insight::format_value(sd(RT, na.rm = TRUE)))) |> 
  arrange(Participant) |> 
  knitr::kable()
> `summarise()` has grouped output by 'Participant'. You can override using the `.groups` argument.
Participant Answer Confidence RT
S1 Lie 0.40 +- 0.10 3.69 +- 0.69
S1 Truth 0.52 +- 0.15 3.75 +- 0.79
S10 Lie 0.59 +- 0.28 3.20 +- 0.71
S10 Truth 0.84 +- 0.13 3.22 +- 0.62
S11 Lie 0.38 +- 0.31 3.60 +- 0.69
S11 Truth 0.72 +- 0.26 3.48 +- 0.62
S12 Lie 0.58 +- 0.13 3.77 +- 1.42
S12 Truth 0.64 +- 0.16 4.40 +- 1.72
S13 Lie 0.28 +- 0.25 7.22 +- 0.82
S13 Truth 0.85 +- 0.17 7.57 +- 0.81
S14 Lie 0.52 +- 0.29 4.13 +- 0.86
S14 Truth 0.62 +- 0.26 5.29 +- 1.54
S15 Lie +- 4.38 +- 0.94
S15 Truth +- 4.15 +- 1.04
S16 Lie 0.41 +- 0.18 4.94 +- 1.13
S16 Truth 0.66 +- 0.11 5.07 +- 1.01
S17 Lie 0.63 +- 0.31 2.72 +- 0.63
S17 Truth 0.77 +- 0.19 2.72 +- 0.51
S18 Lie 0.21 +- 0.33 5.88 +- 2.54
S18 Truth 0.79 +- 0.36 5.13 +- 1.98
S19 Lie +- 3.44 +- 1.55
S19 Truth +- 3.71 +- 2.05
S2 Lie 0.10 +- 0.17 6.46 +- 1.95
S2 Truth 0.94 +- 0.08 6.83 +- 1.71
S20 Lie 0.37 +- 0.32 3.79 +- 0.74
S20 Truth 0.80 +- 0.17 4.04 +- 1.02
S21 Lie 0.55 +- 0.12 4.97 +- 1.01
S21 Truth 0.74 +- 0.18 5.09 +- 1.04
S22 Lie 0.13 +- 0.29 4.78 +- 2.94
S22 Truth 0.81 +- 0.36 5.12 +- 2.99
S23 Lie +- 3.10 +- 1.18
S23 Truth +- 2.99 +- 0.80
S24 Lie 0.27 +- 0.22 2.81 +- 0.68
S24 Truth 0.72 +- 0.17 2.69 +- 0.64
S25 Lie 0.63 +- 0.33 3.71 +- 1.11
S25 Truth 0.85 +- 0.22 3.71 +- 0.92
S26 Lie 0.46 +- 0.21 3.32 +- 0.61
S26 Truth 0.70 +- 0.16 3.23 +- 0.53
S27 Lie 0.32 +- 0.09 5.19 +- 1.88
S27 Truth 0.68 +- 0.07 4.75 +- 1.78
S28 Lie 0.49 +- 0.36 3.78 +- 0.83
S28 Truth 0.59 +- 0.32 3.69 +- 0.50
S29 Lie 0.58 +- 0.50 3.22 +- 0.56
S29 Truth 0.90 +- 0.30 3.31 +- 0.68
S3 Lie +- 3.35 +- 0.80
S3 Truth +- 3.42 +- 0.96
S30 Lie 0.80 +- 0.28 4.09 +- 1.10
S30 Truth 0.88 +- 0.18 3.80 +- 0.99
S4 Lie 0.67 +- 0.18 3.86 +- 0.89
S4 Truth 0.77 +- 0.13 3.81 +- 0.80
S5 Lie 0.53 +- 0.23 3.41 +- 0.63
S5 Truth 0.72 +- 0.21 3.45 +- 0.69
S6 Lie 0.11 +- 0.15 3.10 +- 0.52
S6 Truth 0.90 +- 0.09 3.24 +- 0.64
S7 Lie 0.62 +- 0.33 3.84 +- 0.58
S7 Truth 0.69 +- 0.29 3.69 +- 0.55
S8 Lie 0.44 +- 0.17 4.48 +- 0.95
S8 Truth 0.71 +- 0.16 4.59 +- 1.12
S9 Lie 5.79e-04 +- 9.06e-04 4.75 +- 1.42
S9 Truth 1.00 +- 1.18e-03 5.15 +- 1.88

Outliers

df <- df |> 
  dplyr::filter(Participant != "S9",  # Extreme answers
                !Participant %in% c("S3", "S15", "S19", "S23"))  # No data

Distributions

p1 <- df |> 
  dplyr::filter(!Participant %in% c("S29")) |>
  ggplot(aes(x = Confidence, fill = Participant)) +
  geom_density(alpha = 0.1) +
  see::scale_fill_material_d(palette = "rainbow", guide = "none") +
  see::theme_modern() +
  scale_x_continuous(labels = scales::percent, expand=expansion(c(0, .05))) +
  scale_y_continuous(expand=expansion(c(0, .05))) +
  facet_wrap(~Answer)
p2 <- df |> 
  dplyr::filter(!Participant %in% c("S29")) |> 
  ggplot(aes(x = RT, fill = Participant)) +
  geom_density(alpha = 0.1) +
  see::scale_fill_material_d(palette = "rainbow", guide = "none") +
  scale_x_continuous(expand=expansion(c(0, .05))) +
  scale_y_continuous(expand=expansion(c(0, .05))) +
  facet_wrap(~Answer)
p1 / p2

Manipulation Checks

Correlation

dfsub <- df |> 
  select(Participant, 
         starts_with("YONI_"), 
         starts_with("BES_"),
         starts_with("HCT_"),
         starts_with("MAIA_"),
         starts_with("LIE_")) |> 
  group_by(Participant) |> 
  summarise_all(mean)

Theory of Mind / Empathy

r <- correlation(select(dfsub, starts_with("YONI_")),
                 select(dfsub, starts_with("BES_")), 
                 p_adjust = "none")

summary(r) |> 
  plot()

Interoception

r <- correlation(select(dfsub, starts_with("MAIA_")),
                 select(dfsub, starts_with("HCT_")), 
                 p_adjust = "none")

summary(r) |> 
  plot()

ToM and Interoception

r <- correlation(select(dfsub, starts_with(c("MAIA_", "HCT_"))),
                 select(dfsub, starts_with(c("YONI_", "BES_"))), 
                 p_adjust = "none")

summary(r) |> 
  plot()

Phrasing

  • Main effect of phrasing on RT only. Indirect questions lead to slower answers.

RT

model <- glmmTMB(RT ~ Answer * Phrasing + (1|Participant) + (1|Item), data = df) 

parameters::parameters(model, effects = "fixed")
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) 4.01 0.24 0.95 3.54 4.47 16.80 Inf 0.00 fixed
AnswerTruth 0.03 0.07 0.95 -0.11 0.17 0.39 Inf 0.69 fixed
PhrasingIndirect 0.33 0.07 0.95 0.19 0.48 4.58 Inf 0.00 fixed
AnswerTruth:PhrasingIndirect 0.08 0.10 0.95 -0.12 0.28 0.79 Inf 0.43 fixed
estimate_means(model, at = c("Answer", "Phrasing")) |> 
  plot(show_data = "none") 

Confidence

model <- glmmTMB(Confidence ~ Answer * Phrasing + (1|Participant) + (1|Item), data = df) 

parameters::parameters(model)
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects Group Component
(Intercept) 0.45 0.02 0.95 0.41 0.49 20.53 Inf 0.00 fixed conditional
AnswerTruth 0.31 0.02 0.95 0.28 0.34 18.53 Inf 0.00 fixed conditional
PhrasingIndirect -0.02 0.02 0.95 -0.05 0.02 -0.93 Inf 0.35 fixed conditional
AnswerTruth:PhrasingIndirect 0.00 0.02 0.95 -0.04 0.05 0.13 Inf 0.90 fixed conditional
SD (Intercept) 0.09 0.95 0.07 0.13 random Participant conditional
SD (Intercept) 0.01 0.95 0.07 0.13 random Item conditional
SD (Observations) 0.26 0.95 0.25 0.27 random Residual conditional
estimate_means(model, at = c("Answer", "Phrasing")) |> 
  plot(show_data = "none") 

Lies and RT

  • The faster they answer, the more confident they are in their lies.
# Adjustments for beta models
df$Confidence[df$Confidence == 1] <- 0.99999
df$Confidence[df$Confidence == 0] <- 0.00001

model <- glmmTMB(Confidence ~ RT * Answer + Phrasing + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

parameters::parameters(model, effects = "fixed")
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) 0.77 0.13 0.95 0.51 1.04 5.80 Inf 0.00 fixed
RT -0.25 0.03 0.95 -0.30 -0.20 -9.53 Inf 0.00 fixed
AnswerTruth -0.02 0.15 0.95 -0.31 0.26 -0.16 Inf 0.88 fixed
PhrasingIndirect 0.00 0.05 0.95 -0.11 0.10 -0.02 Inf 0.98 fixed
RT:AnswerTruth 0.29 0.03 0.95 0.23 0.35 9.03 Inf 0.00 fixed
estimate_relation(model, at = c("RT", "Answer")) |> 
  plot(length = 50, point = list(alpha = 0.3, size = 3.5)) 

Effect of Condition

Confidence

  • Significant interaction between the condition and the answer: the effect of answer (being more confident in truths than in lies) is lower in the social condition.
  • Effect mostly driven by the social condition that increases the confidence in lies.
  • In other words: harder to lie in the polygraph condition: less obvious feedback cues?
model <- glmmTMB(Confidence ~ Answer * Condition + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

parameters::parameters(model, effects = "fixed")
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) -0.34 0.08 0.95 -0.51 -0.18 -4.1 Inf 0.00 fixed
AnswerTruth 1.29 0.08 0.95 1.14 1.44 16.4 Inf 0.00 fixed
ConditionSocial 0.14 0.08 0.95 -0.01 0.29 1.8 Inf 0.07 fixed
AnswerTruth:ConditionSocial -0.22 0.11 0.95 -0.43 -0.01 -2.1 Inf 0.04 fixed
estimate_means(model, at = c("Condition", "Answer")) |> 
  plot(show_data = "none") 

RT

  • Significant main effect of the condition: people are slower in the polygraph condition.
  • Consistent with the “less intuitive cues.”
model <- glmmTMB(RT ~ Answer * Condition + (1|Participant) + (1|Item), 
                 data = df)

parameters::parameters(model, effects = "fixed")
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) 4.42 0.24 0.95 3.96 4.89 18.57 Inf 0.00 fixed
AnswerTruth 0.07 0.07 0.95 -0.07 0.21 1.00 Inf 0.32 fixed
ConditionSocial -0.51 0.07 0.95 -0.65 -0.37 -7.05 Inf 0.00 fixed
AnswerTruth:ConditionSocial -0.01 0.10 0.95 -0.20 0.19 -0.05 Inf 0.96 fixed
estimate_means(model, at = c("Condition", "Answer")) |> 
  plot(show_data = "none") 

Theory of Mind / Empathy

Yoni Task

  • When instructed to lie, the decrease in confidence in participants with higher yoni score is less in social than polygraph condition.

Correlation with LIE Scale

get_correlation <- function(var = "YONI_", var2 = "LIE_") {
  r <- correlation(select(dfsub, starts_with(var2)),
                 select(dfsub, starts_with(var)), 
                 p_adjust = "none") |> 
  mutate(Parameter1 = paste0(str_replace(Parameter1, "_", " ("), ")"),
         Parameter2 = paste0(str_replace(Parameter2, "_", " ("), ")"))

  p <- summary(r) |> 
    plot() +
    theme_minimal()
  list(r = r, plot = p)
}

r <- get_correlation(var = "YONI_")
r$plot

Total Score

Confidence

model <- glmmTMB(Confidence ~ Answer / (Condition * YONI_Total) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

get_parameters <- function(model) {
  # Parameters
  params <- parameters::parameters(model, effects = "fixed") 
  # Marginal effects
  at <- c("Answer", "Condition")
  trend <- insight::find_predictors(model)$conditional
  trend <- trend[!trend %in% at]
  marg <- modelbased::estimate_slopes(model, trend = trend, at = at)
  # Output
  list(params = params, marginal_effects = marg)
}

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) 2.70 0.95 0.95 0.84 4.56 2.8 Inf 0.00 fixed
AnswerTruth -3.67 0.89 0.95 -5.41 -1.92 -4.1 Inf 0.00 fixed
AnswerLie:ConditionSocial -2.47 0.90 0.95 -4.23 -0.71 -2.8 Inf 0.01 fixed
AnswerTruth:ConditionSocial 2.06 0.88 0.95 0.35 3.78 2.4 Inf 0.02 fixed
AnswerLie:YONI_Total -0.04 0.01 0.95 -0.06 -0.01 -3.2 Inf 0.00 fixed
AnswerTruth:YONI_Total 0.02 0.01 0.95 0.00 0.04 2.0 Inf 0.04 fixed
AnswerLie:ConditionSocial:YONI_Total 0.03 0.01 0.95 0.01 0.05 2.9 Inf 0.00 fixed
AnswerTruth:ConditionSocial:YONI_Total -0.03 0.01 0.95 -0.05 -0.01 -2.5 Inf 0.01 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie -0.04 0.01 0.95 -0.06 -0.01 -3.22 1980 0.00
Social Lie -0.01 0.01 0.95 -0.03 0.02 -0.46 1980 0.64
Polygraph Truth 0.02 0.01 0.95 0.00 0.04 2.04 1980 0.04
Social Truth 0.00 0.01 0.95 -0.02 0.02 -0.24 1980 0.81
plot_effect <- function(model, var = "YONI_Total", outcome = "Confidence") {
  data <- df |> 
    group_by(Participant, Answer, Condition) |> 
    summarise({{var}} := mean(.data[[var]], na.rm = TRUE),
              SD = sd(.data[[outcome]], na.rm = TRUE),
              {{outcome}} := mean(.data[[outcome]], na.rm = TRUE),
              CI_low = .data[[outcome]] - SD / 2,
              CI_high = .data[[outcome]] + SD / 2) 
  
  dodge_width <- 0.02 * diff(range(data[[var]]))
  ylab <- ifelse(outcome == "RT", "Reaction Time (s)", "Confidence")
  
  link_data <- estimate_relation(model, at = c("Condition", var, "Answer"), length = 30)
  ggplot(link_data, aes_string(x = var, y = "Predicted")) +
    geom_pointrange(data = data, aes_string(y = outcome, color = "Condition", ymin = "CI_low", ymax = "CI_high"), position = position_dodge(width = dodge_width)) +
    geom_ribbon(aes(ymin = CI_low, ymax = CI_high, fill = Condition), alpha = 0.33) + 
    geom_line(aes(color = Condition)) +
    labs(y = ylab, x = paste0(stringr::str_replace(var, "_", " ("), ")")) +
    scale_color_manual(values = c("Polygraph" = "#FF5722", "Social" = "#2196F3")) +
    scale_fill_manual(values = c("Polygraph" = "#FF5722", "Social" = "#2196F3")) +
    facet_wrap(~Answer) 
}

plot_effect(model, var = "YONI_Total", outcome = "Confidence")

RT

model <- glmmTMB(RT ~ Answer / (Condition * YONI_Total) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) 0.60 2.53 0.95 -4.37 5.57 0.24 Inf 0.81 fixed
AnswerTruth 2.81 0.81 0.95 1.23 4.40 3.48 Inf 0.00 fixed
AnswerLie:ConditionSocial 1.35 0.81 0.95 -0.24 2.93 1.67 Inf 0.10 fixed
AnswerTruth:ConditionSocial -1.49 0.81 0.95 -3.07 0.09 -1.85 Inf 0.06 fixed
AnswerLie:YONI_Total 0.04 0.03 0.95 -0.01 0.10 1.52 Inf 0.13 fixed
AnswerTruth:YONI_Total 0.01 0.03 0.95 -0.05 0.07 0.43 Inf 0.67 fixed
AnswerLie:ConditionSocial:YONI_Total -0.02 0.01 0.95 -0.04 0.00 -2.30 Inf 0.02 fixed
AnswerTruth:ConditionSocial:YONI_Total 0.01 0.01 0.95 -0.01 0.03 1.22 Inf 0.22 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie 0.04 0.03 0.95 -0.01 0.10 1.52 1989 0.13
Social Lie 0.02 0.03 0.95 -0.03 0.08 0.78 1989 0.44
Polygraph Truth 0.01 0.03 0.95 -0.05 0.07 0.43 1989 0.67
Social Truth 0.02 0.03 0.95 -0.03 0.08 0.82 1989 0.41
plot_effect(model, var = "YONI_Total", outcome = "RT")

BES

Total Score

LIE Scale

r <- get_correlation(var = "BES_")
r$plot

Confidence

model <- glmmTMB(Confidence ~ Answer / (Condition * BES_Total) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) 2.29 0.74 0.95 0.84 3.73 3.1 Inf 0.00 fixed
AnswerTruth -3.03 0.66 0.95 -4.31 -1.74 -4.6 Inf 0.00 fixed
AnswerLie:ConditionSocial -1.58 0.67 0.95 -2.91 -0.26 -2.4 Inf 0.02 fixed
AnswerTruth:ConditionSocial 1.00 0.67 0.95 -0.30 2.31 1.5 Inf 0.13 fixed
AnswerLie:BES_Total -0.03 0.01 0.95 -0.05 -0.02 -3.6 Inf 0.00 fixed
AnswerTruth:BES_Total 0.02 0.01 0.95 0.00 0.04 2.3 Inf 0.02 fixed
AnswerLie:ConditionSocial:BES_Total 0.02 0.01 0.95 0.01 0.04 2.5 Inf 0.01 fixed
AnswerTruth:ConditionSocial:BES_Total -0.01 0.01 0.95 -0.03 0.00 -1.6 Inf 0.10 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie -0.03 0.01 0.95 -0.05 -0.02 -3.57 1980 0.00
Social Lie -0.01 0.01 0.95 -0.03 0.01 -1.21 1980 0.22
Polygraph Truth 0.02 0.01 0.95 0.00 0.04 2.28 1980 0.02
Social Truth 0.01 0.01 0.95 -0.01 0.03 0.82 1980 0.41
plot_effect(model, var = "BES_Total", outcome = "Confidence")

RT

model <- glmmTMB(RT ~ Answer / (Condition * BES_Total) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) 4.80 2.08 0.95 0.73 8.87 2.31 Inf 0.02 fixed
AnswerTruth 1.96 0.66 0.95 0.66 3.25 2.97 Inf 0.00 fixed
AnswerLie:ConditionSocial 1.27 0.66 0.95 -0.02 2.56 1.93 Inf 0.05 fixed
AnswerTruth:ConditionSocial -1.16 0.66 0.95 -2.45 0.13 -1.76 Inf 0.08 fixed
AnswerLie:BES_Total -0.01 0.03 0.95 -0.06 0.05 -0.18 Inf 0.85 fixed
AnswerTruth:BES_Total -0.03 0.03 0.95 -0.08 0.02 -1.10 Inf 0.27 fixed
AnswerLie:ConditionSocial:BES_Total -0.02 0.01 0.95 -0.04 -0.01 -2.72 Inf 0.01 fixed
AnswerTruth:ConditionSocial:BES_Total 0.01 0.01 0.95 -0.01 0.03 0.99 Inf 0.32 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie -0.01 0.03 0.95 -0.06 0.05 -0.18 1989 0.85
Social Lie -0.03 0.03 0.95 -0.08 0.03 -1.05 1989 0.30
Polygraph Truth -0.03 0.03 0.95 -0.08 0.02 -1.10 1989 0.27
Social Truth -0.02 0.03 0.95 -0.08 0.03 -0.78 1989 0.43
plot_effect(model, var = "BES_Total", outcome = "RT")

Interoception

Heartbeat Counting

Correlation with LIE Scale

r <- get_correlation(var = "HCT_")
r$plot

# plot(correlation::cor_test(dfsub, "HCT_Accuracy", "LIE_Ability"))

Accuracy

Confidence

model <- glmmTMB(Confidence ~ Answer / (Condition * HCT_Accuracy) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) -1.17 0.26 0.95 -1.68 -0.67 -4.6 Inf 0.00 fixed
AnswerTruth 2.76 0.23 0.95 2.30 3.22 11.9 Inf 0.00 fixed
AnswerLie:ConditionSocial 0.62 0.23 0.95 0.16 1.08 2.7 Inf 0.01 fixed
AnswerTruth:ConditionSocial -0.59 0.23 0.95 -1.04 -0.14 -2.6 Inf 0.01 fixed
AnswerLie:HCT_Accuracy 1.37 0.40 0.95 0.59 2.16 3.4 Inf 0.00 fixed
AnswerTruth:HCT_Accuracy -1.06 0.40 0.95 -1.84 -0.28 -2.7 Inf 0.01 fixed
AnswerLie:ConditionSocial:HCT_Accuracy -0.81 0.36 0.95 -1.52 -0.09 -2.2 Inf 0.03 fixed
AnswerTruth:ConditionSocial:HCT_Accuracy 0.85 0.36 0.95 0.15 1.55 2.4 Inf 0.02 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie 1.37 0.4 0.95 0.59 2.16 3.44 1980 0.00
Social Lie 0.57 0.4 0.95 -0.22 1.35 1.42 1980 0.15
Polygraph Truth -1.06 0.4 0.95 -1.84 -0.28 -2.68 1980 0.01
Social Truth -0.21 0.4 0.95 -0.99 0.57 -0.53 1980 0.59
plot_effect(model, var = "HCT_Accuracy", outcome = "Confidence")

RT

model <- glmmTMB(RT ~ Answer / (Condition * HCT_Accuracy) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) 4.87 0.70 0.95 3.49 6.25 6.92 Inf 0.00 fixed
AnswerTruth -0.30 0.22 0.95 -0.74 0.13 -1.37 Inf 0.17 fixed
AnswerLie:ConditionSocial -0.51 0.22 0.95 -0.94 -0.07 -2.28 Inf 0.02 fixed
AnswerTruth:ConditionSocial -0.30 0.22 0.95 -0.73 0.13 -1.36 Inf 0.17 fixed
AnswerLie:HCT_Accuracy -0.74 1.09 0.95 -2.88 1.41 -0.67 Inf 0.50 fixed
AnswerTruth:HCT_Accuracy -0.12 1.09 0.95 -2.26 2.02 -0.11 Inf 0.92 fixed
AnswerLie:ConditionSocial:HCT_Accuracy 0.00 0.34 0.95 -0.68 0.67 -0.01 Inf 1.00 fixed
AnswerTruth:ConditionSocial:HCT_Accuracy -0.35 0.34 0.95 -1.02 0.33 -1.01 Inf 0.31 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie -0.74 1.1 0.95 -2.9 1.4 -0.67 1989 0.50
Social Lie -0.74 1.1 0.95 -2.9 1.4 -0.68 1989 0.50
Polygraph Truth -0.12 1.1 0.95 -2.3 2.0 -0.11 1989 0.92
Social Truth -0.47 1.1 0.95 -2.6 1.7 -0.43 1989 0.67
plot_effect(model, var = "HCT_Accuracy", outcome = "RT")

Interoceptive Confidence

Confidence

model <- glmmTMB(Confidence ~ Answer / (Condition * HCT_Confidence) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) -0.90 0.17 0.95 -1.23 -0.58 -5.39 Inf 0.00 fixed
AnswerTruth 1.88 0.16 0.95 1.57 2.20 11.60 Inf 0.00 fixed
AnswerLie:ConditionSocial 0.45 0.16 0.95 0.13 0.77 2.75 Inf 0.01 fixed
AnswerTruth:ConditionSocial -0.40 0.16 0.95 -0.71 -0.08 -2.48 Inf 0.01 fixed
AnswerLie:HCT_Confidence 1.11 0.29 0.95 0.54 1.69 3.80 Inf 0.00 fixed
AnswerTruth:HCT_Confidence -0.07 0.29 0.95 -0.64 0.50 -0.25 Inf 0.80 fixed
AnswerLie:ConditionSocial:HCT_Confidence -0.63 0.29 0.95 -1.19 -0.06 -2.17 Inf 0.03 fixed
AnswerTruth:ConditionSocial:HCT_Confidence 0.63 0.28 0.95 0.08 1.19 2.24 Inf 0.03 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie 1.11 0.29 0.95 0.54 1.7 3.80 1980 0.00
Social Lie 0.49 0.30 0.95 -0.10 1.1 1.64 1980 0.10
Polygraph Truth -0.07 0.29 0.95 -0.64 0.5 -0.25 1980 0.80
Social Truth 0.56 0.29 0.95 -0.02 1.1 1.91 1980 0.06
plot_effect(model, var = "HCT_Confidence", outcome = "Confidence")

RT

model <- glmmTMB(RT ~ Answer / (Condition * HCT_Confidence) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) 5.41 0.45 0.95 4.53 6.28 12.08 Inf 0.00 fixed
AnswerTruth 0.03 0.15 0.95 -0.28 0.33 0.17 Inf 0.87 fixed
AnswerLie:ConditionSocial -0.71 0.15 0.95 -1.01 -0.41 -4.64 Inf 0.00 fixed
AnswerTruth:ConditionSocial -0.47 0.15 0.95 -0.77 -0.17 -3.08 Inf 0.00 fixed
AnswerLie:HCT_Confidence -1.96 0.78 0.95 -3.49 -0.43 -2.51 Inf 0.01 fixed
AnswerTruth:HCT_Confidence -1.86 0.78 0.95 -3.40 -0.33 -2.39 Inf 0.02 fixed
AnswerLie:ConditionSocial:HCT_Confidence 0.41 0.27 0.95 -0.12 0.94 1.52 Inf 0.13 fixed
AnswerTruth:ConditionSocial:HCT_Confidence -0.08 0.27 0.95 -0.61 0.45 -0.29 Inf 0.77 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie -2.0 0.78 0.95 -3.5 -0.43 -2.5 1989 0.01
Social Lie -1.6 0.78 0.95 -3.1 -0.01 -2.0 1989 0.05
Polygraph Truth -1.9 0.78 0.95 -3.4 -0.33 -2.4 1989 0.02
Social Truth -1.9 0.78 0.95 -3.5 -0.41 -2.5 1989 0.01
plot_effect(model, var = "HCT_Confidence", outcome = "RT")

Interoceptive Awareness

Confidence

model <- glmmTMB(Confidence ~ Answer / (Condition * HCT_Awareness) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) -0.35 0.08 0.95 -0.52 -0.19 -4.15 Inf 0.00 fixed
AnswerTruth 1.31 0.08 0.95 1.16 1.46 16.82 Inf 0.00 fixed
AnswerLie:ConditionSocial 0.14 0.08 0.95 -0.01 0.29 1.83 Inf 0.07 fixed
AnswerTruth:ConditionSocial -0.09 0.08 0.95 -0.24 0.06 -1.14 Inf 0.25 fixed
AnswerLie:HCT_Awareness -0.60 0.14 0.95 -0.87 -0.33 -4.35 Inf 0.00 fixed
AnswerTruth:HCT_Awareness 0.19 0.14 0.95 -0.08 0.46 1.40 Inf 0.16 fixed
AnswerLie:ConditionSocial:HCT_Awareness 0.58 0.13 0.95 0.33 0.83 4.52 Inf 0.00 fixed
AnswerTruth:ConditionSocial:HCT_Awareness 0.00 0.13 0.95 -0.25 0.24 -0.03 Inf 0.98 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie -0.60 0.14 0.95 -0.87 -0.33 -4.35 1980 0.00
Social Lie -0.03 0.14 0.95 -0.30 0.25 -0.19 1980 0.85
Polygraph Truth 0.19 0.14 0.95 -0.08 0.46 1.40 1980 0.16
Social Truth 0.19 0.14 0.95 -0.08 0.46 1.37 1980 0.17
plot_effect(model, var = "HCT_Awareness", outcome = "Confidence")

RT

model <- glmmTMB(RT ~ Answer / (Condition * HCT_Awareness) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) 4.45 0.22 0.95 4.01 4.88 19.96 Inf 0.00 fixed
AnswerTruth 0.07 0.07 0.95 -0.07 0.21 0.93 Inf 0.35 fixed
AnswerLie:ConditionSocial -0.51 0.07 0.95 -0.65 -0.37 -7.16 Inf 0.00 fixed
AnswerTruth:ConditionSocial -0.51 0.07 0.95 -0.65 -0.37 -7.16 Inf 0.00 fixed
AnswerLie:HCT_Awareness 0.92 0.35 0.95 0.22 1.61 2.58 Inf 0.01 fixed
AnswerTruth:HCT_Awareness 0.70 0.35 0.95 0.01 1.40 1.98 Inf 0.05 fixed
AnswerLie:ConditionSocial:HCT_Awareness -0.32 0.12 0.95 -0.55 -0.08 -2.66 Inf 0.01 fixed
AnswerTruth:ConditionSocial:HCT_Awareness -0.08 0.12 0.95 -0.31 0.16 -0.64 Inf 0.52 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie 0.92 0.35 0.95 0.22 1.6 2.6 1989 0.01
Social Lie 0.60 0.35 0.95 -0.10 1.3 1.7 1989 0.09
Polygraph Truth 0.70 0.35 0.95 0.01 1.4 2.0 1989 0.05
Social Truth 0.63 0.35 0.95 -0.07 1.3 1.8 1989 0.08
plot_effect(model, var = "HCT_Awareness", outcome = "RT")

MAIA

Correlation with LIE Scale

r <- get_correlation(var = "MAIA_")
r$plot +
  theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust=1))

Total Score

Confidence

model <- glmmTMB(Confidence ~ Answer / (Condition * MAIA_Total) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) -2.39 0.40 0.95 -3.17 -1.60 -6.0 Inf 0.00 fixed
AnswerTruth 3.93 0.37 0.95 3.20 4.66 10.5 Inf 0.00 fixed
AnswerLie:ConditionSocial 1.48 0.38 0.95 0.73 2.23 3.8 Inf 0.00 fixed
AnswerTruth:ConditionSocial -0.93 0.38 0.95 -1.67 -0.19 -2.4 Inf 0.01 fixed
AnswerLie:MAIA_Total 0.75 0.14 0.95 0.47 1.04 5.2 Inf 0.00 fixed
AnswerTruth:MAIA_Total -0.22 0.14 0.95 -0.50 0.06 -1.5 Inf 0.12 fixed
AnswerLie:ConditionSocial:MAIA_Total -0.50 0.14 0.95 -0.77 -0.23 -3.6 Inf 0.00 fixed
AnswerTruth:ConditionSocial:MAIA_Total 0.31 0.14 0.95 0.05 0.58 2.3 Inf 0.02 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie 0.75 0.14 0.95 0.47 1.04 5.23 1980 0.00
Social Lie 0.26 0.15 0.95 -0.03 0.54 1.76 1980 0.08
Polygraph Truth -0.22 0.14 0.95 -0.50 0.06 -1.54 1980 0.12
Social Truth 0.09 0.15 0.95 -0.19 0.38 0.64 1980 0.52
plot_effect(model, var = "MAIA_Total", outcome = "Confidence")

RT

model <- glmmTMB(RT ~ Answer / (Condition * MAIA_Total) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) 7.47 1.07 0.95 5.38 9.56 6.99 Inf 0.00 fixed
AnswerTruth -0.19 0.36 0.95 -0.90 0.52 -0.53 Inf 0.60 fixed
AnswerLie:ConditionSocial -2.21 0.36 0.95 -2.92 -1.50 -6.12 Inf 0.00 fixed
AnswerTruth:ConditionSocial -1.65 0.36 0.95 -2.36 -0.94 -4.57 Inf 0.00 fixed
AnswerLie:MAIA_Total -1.12 0.38 0.95 -1.87 -0.37 -2.91 Inf 0.00 fixed
AnswerTruth:MAIA_Total -1.02 0.38 0.95 -1.78 -0.27 -2.66 Inf 0.01 fixed
AnswerLie:ConditionSocial:MAIA_Total 0.63 0.13 0.95 0.37 0.88 4.82 Inf 0.00 fixed
AnswerTruth:ConditionSocial:MAIA_Total 0.42 0.13 0.95 0.16 0.67 3.22 Inf 0.00 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie -1.12 0.38 0.95 -1.9 -0.37 -2.9 1989 0.00
Social Lie -0.49 0.38 0.95 -1.2 0.26 -1.3 1989 0.20
Polygraph Truth -1.02 0.38 0.95 -1.8 -0.27 -2.7 1989 0.01
Social Truth -0.60 0.38 0.95 -1.4 0.15 -1.6 1989 0.12
plot_effect(model, var = "MAIA_Total", outcome = "RT")

Noticing

Confidence

model <- glmmTMB(Confidence ~ Answer / (Condition * MAIA_Noticing) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) -1.64 0.37 0.95 -2.37 -0.92 -4.4 Inf 0.00 fixed
AnswerTruth 3.44 0.32 0.95 2.81 4.08 10.7 Inf 0.00 fixed
AnswerLie:ConditionSocial 1.70 0.34 0.95 1.03 2.36 5.0 Inf 0.00 fixed
AnswerTruth:ConditionSocial -0.57 0.33 0.95 -1.22 0.08 -1.7 Inf 0.09 fixed
AnswerLie:MAIA_Noticing 0.39 0.11 0.95 0.18 0.60 3.6 Inf 0.00 fixed
AnswerTruth:MAIA_Noticing -0.25 0.11 0.95 -0.47 -0.04 -2.3 Inf 0.02 fixed
AnswerLie:ConditionSocial:MAIA_Noticing -0.47 0.10 0.95 -0.66 -0.28 -4.8 Inf 0.00 fixed
AnswerTruth:ConditionSocial:MAIA_Noticing 0.15 0.10 0.95 -0.04 0.33 1.5 Inf 0.13 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie 0.39 0.11 0.95 0.18 0.60 3.65 1980 0.00
Social Lie -0.08 0.11 0.95 -0.30 0.14 -0.70 1980 0.48
Polygraph Truth -0.25 0.11 0.95 -0.47 -0.04 -2.34 1980 0.02
Social Truth -0.11 0.11 0.95 -0.32 0.11 -0.99 1980 0.32
plot_effect(model, var = "MAIA_Noticing", outcome = "Confidence")

RT

model <- glmmTMB(RT ~ Answer / (Condition * MAIA_Noticing) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) 6.72 1.03 0.95 4.70 8.73 6.52 Inf 0.00 fixed
AnswerTruth -0.98 0.33 0.95 -1.64 -0.33 -2.96 Inf 0.00 fixed
AnswerLie:ConditionSocial -1.92 0.33 0.95 -2.57 -1.26 -5.76 Inf 0.00 fixed
AnswerTruth:ConditionSocial -0.75 0.33 0.95 -1.40 -0.10 -2.25 Inf 0.02 fixed
AnswerLie:MAIA_Noticing -0.68 0.30 0.95 -1.26 -0.10 -2.28 Inf 0.02 fixed
AnswerTruth:MAIA_Noticing -0.36 0.30 0.95 -0.94 0.22 -1.23 Inf 0.22 fixed
AnswerLie:ConditionSocial:MAIA_Noticing 0.42 0.10 0.95 0.23 0.60 4.34 Inf 0.00 fixed
AnswerTruth:ConditionSocial:MAIA_Noticing 0.07 0.10 0.95 -0.12 0.26 0.73 Inf 0.47 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie -0.68 0.3 0.95 -1.26 -0.10 -2.28 1989 0.02
Social Lie -0.26 0.3 0.95 -0.84 0.32 -0.88 1989 0.38
Polygraph Truth -0.36 0.3 0.95 -0.94 0.22 -1.23 1989 0.22
Social Truth -0.29 0.3 0.95 -0.87 0.29 -0.99 1989 0.32
plot_effect(model, var = "MAIA_Noticing", outcome = "RT")

Not Distracting

Confidence

model <- glmmTMB(Confidence ~ Answer / (Condition * MAIA_NotDistracting) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) -0.79 0.17 0.95 -1.12 -0.45 -4.60 Inf 0.00 fixed
AnswerTruth 1.63 0.16 0.95 1.31 1.95 10.02 Inf 0.00 fixed
AnswerLie:ConditionSocial 0.32 0.16 0.95 0.01 0.64 2.00 Inf 0.05 fixed
AnswerTruth:ConditionSocial 0.00 0.16 0.95 -0.31 0.32 0.02 Inf 0.99 fixed
AnswerLie:MAIA_NotDistracting 0.27 0.09 0.95 0.09 0.45 2.95 Inf 0.00 fixed
AnswerTruth:MAIA_NotDistracting 0.06 0.09 0.95 -0.11 0.24 0.70 Inf 0.48 fixed
AnswerLie:ConditionSocial:MAIA_NotDistracting -0.11 0.09 0.95 -0.28 0.06 -1.30 Inf 0.19 fixed
AnswerTruth:ConditionSocial:MAIA_NotDistracting -0.05 0.09 0.95 -0.22 0.11 -0.63 Inf 0.53 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie 0.27 0.09 0.95 0.09 0.45 2.95 1980 0.00
Social Lie 0.16 0.09 0.95 -0.02 0.34 1.71 1980 0.09
Polygraph Truth 0.06 0.09 0.95 -0.11 0.24 0.70 1980 0.48
Social Truth 0.01 0.09 0.95 -0.17 0.19 0.11 1980 0.92
plot_effect(model, var = "MAIA_NotDistracting", outcome = "Confidence")

RT

model <- glmmTMB(RT ~ Answer / (Condition * MAIA_NotDistracting) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
Parameter Coefficient SE CI CI_low CI_high z df_error p Effects
(Intercept) 4.60 0.49 0.95 3.64 5.56 9.38 Inf 0.00 fixed
AnswerTruth -0.07 0.15 0.95 -0.37 0.23 -0.46 Inf 0.65 fixed
AnswerLie:ConditionSocial -0.49 0.15 0.95 -0.79 -0.19 -3.23 Inf 0.00 fixed
AnswerTruth:ConditionSocial -0.23 0.15 0.95 -0.53 0.07 -1.49 Inf 0.14 fixed
AnswerLie:MAIA_NotDistracting -0.11 0.26 0.95 -0.62 0.40 -0.41 Inf 0.68 fixed
AnswerTruth:MAIA_NotDistracting -0.02 0.26 0.95 -0.53 0.49 -0.08 Inf 0.94 fixed
AnswerLie:ConditionSocial:MAIA_NotDistracting -0.01 0.08 0.95 -0.17 0.15 -0.09 Inf 0.93 fixed
AnswerTruth:ConditionSocial:MAIA_NotDistracting -0.17 0.08 0.95 -0.34 -0.01 -2.10 Inf 0.04 fixed
results$marginal_effects
Condition Answer Coefficient SE CI CI_low CI_high t df_error p
Polygraph Lie -0.11 0.26 0.95 -0.62 0.41 -0.41 1989 0.68
Social Lie -0.12 0.26 0.95 -0.63 0.40 -0.44 1989 0.66
Polygraph Truth -0.02 0.26 0.95 -0.53 0.49 -0.08 1989 0.94
Social Truth -0.19 0.26 0.95 -0.71 0.32 -0.74 1989 0.46
plot_effect(model, var = "MAIA_NotDistracting", outcome = "RT")

Full Code

The full script of executive code contained in this document is reproduced here.

# Set up the environment (or use local alternative `source("utils/config.R")`)
source("https://raw.githubusercontent.com/RealityBending/TemplateResults/main/utils/config.R")  

library(ggplot2)
theme_set(see::theme_modern())
# This chunk is a bit complex so don't worry about it: it's made to add badges to the HTML versions
# NOTE: You have to replace the links accordingly to have working "buttons" on the HTML versions
if (!knitr::is_latex_output() && knitr::is_html_output()) {
  cat("![Build](https://github.com/RealityBending/TemplateResults/workflows/Build/badge.svg)
      [![Website](https://img.shields.io/badge/repo-Readme-2196F3)](https://github.com/RealityBending/TemplateResults)
      [![Website](https://img.shields.io/badge/visit-website-E91E63)](https://realitybending.github.io/TemplateResults/)
      [![Website](https://img.shields.io/badge/download-.docx-FF5722)](https://github.com/RealityBending/TemplateResults/raw/main/word_and_pdf/SupplementaryMaterials.docx)
      [![Website](https://img.shields.io/badge/see-.pdf-FF9800)](https://github.com/RealityBending/TemplateResults/blob/main/word_and_pdf/SupplementaryMaterials.pdf)")
}
library(tidyverse)
library(patchwork)
library(glmmTMB)
library(report)
library(parameters)
library(correlation)
library(modelbased)
library(performance)
library(see)

summary(report::report(sessionInfo()))
# setwd("C:/Users/user/Desktop/Sputnik/2019-23/DeceptionInteroTom")

df <- read.csv("data/data_combined.csv") %>% 
  mutate(ID = as.factor(paste0("S", ID)),
         condition = as.factor(condition),
         item = as.factor(item),
         style = as.factor(style),
         instruction = as.factor(instruction)) |> 
  #TODO: This renaming should be done at the preprocessing stage
  rename("Participant" = "ID",
         "Condition" = "condition",
         "Item" = "item",
         "Phrasing" = "style",
         "Answer" = "instruction",
         "YONI_Total" = "yoni_total",
         "YONI_Affective" = "yoni_affective",
         "YONI_Cognitive" = "yoni_cognitive",
         "YONI_Physical" = "yoni_physical",
         "BES_Total" = "BES_total",
         "BES_Cognitive" = "BES_cognitive",
         "BES_Affective" = "BES_affective",
         "HCT_Confidence" = "HCT_confidence",
         "HCT_Accuracy" = "HCT_accuracy",
         "HCT_Awareness" = "HCT_awareness",
         "MAIA_Total" = "MAIA_total",
         "MAIA_AttentionRegulation" = "MAIA_attention_regulation",
         "MAIA_BodyListening" = "MAIA_body_listening",
         "MAIA_EmotionalAwareness" = "MAIA_emotional_awareness",
         "MAIA_NotDistracting" = "MAIA_not_distracting",
         "MAIA_NotWorrying" = "MAIA_not_worrying",
         "MAIA_Noticing" = "MAIA_noticing",
         "MAIA_SelfRegulation" = "MAIA_self_regulation",
         "MAIA_Trusting" = "MAIA_trusting",
         "LIE_Ability" = "lie_ability",
         "LIE_Frequency" = "lie_frequency",
         "LIE_Negativity" = "lie_negativity",
         "LIE_Contextuality" = "lie_contextuality",
         "Confidence" = "DT_confidence",
         "RT" = "DT_RT") |> 
  mutate(Answer = fct_recode(Answer, Lie = "LIE", Truth = "TRUTH")) |> 
  select(-HCT_guess, -HCT_noguess, -HCT_onebreath)

cat(paste("The data consists of",
          report::report_participants(df,
                                      participants = "Participant",
                                      sex = "Gender",
                                      age = "Age")))
report::cite_packages(sessionInfo())
df %>% 
  group_by(Participant) %>% 
  select(starts_with("YONI_")) |> 
  summarise_all(mean, na.rm=TRUE) |> 
  tidyr::pivot_longer(-Participant, values_to = "Scores") |> 
  mutate(name = paste0(str_replace(name, "_", " ("), ")")) |> 
  ggplot(aes(x = Scores, fill = name)) +
  geom_density() +
  scale_fill_manual(values = c("YONI (Affective)" = "Purple",
                                 "YONI (Cognitive)" = "Blue",
                                 "YONI (Physical)" = "Green",
                                 "YONI (Total)"= "DarkBlue"),
                      guide = "none") +
  facet_wrap(~name, scales = "free")
df %>% 
  group_by(Participant) %>% 
  select(starts_with("BES_")) |> 
  summarise_all(mean, na.rm=TRUE) |> 
  tidyr::pivot_longer(-Participant, values_to = "Scores") |> 
  mutate(name = paste0(str_replace(name, "_", " ("), ")")) |> 
  ggplot(aes(x = Scores, fill = name)) +
  geom_density() +
  scale_fill_manual(values = c("BES (Affective)" = "Purple",
                               "BES (Cognitive)" = "Blue",
                               "BES (Total)"= "DarkBlue"),
                      guide = "none") +
  facet_wrap(~name, scales = "free")
df %>% 
  group_by(Participant) %>% 
  select(starts_with("HCT_")) |> 
  summarise_all(mean, na.rm=TRUE) |> 
  tidyr::pivot_longer(-Participant, values_to = "Scores") |> 
  mutate(name = paste0(str_replace(name, "_", " ("), ")")) |> 
  ggplot(aes(x = Scores, fill = name)) +
  geom_density() +
  scale_fill_manual(values = c("HCT (Accuracy)" = "Red",
                               "HCT (Awareness)" = "Orange",
                               "HCT (Confidence)"= "DarkOrange"),
                      guide = "none") +
  facet_wrap(~name, scales = "free")
df %>% 
  group_by(Participant) %>% 
  select(starts_with("MAIA_")) |> 
  summarise_all(mean, na.rm=TRUE) |> 
  tidyr::pivot_longer(-Participant, values_to = "Scores") |> 
  mutate(name = paste0(str_replace(name, "_", " ("), ")")) |> 
  ggplot(aes(x = Scores, fill = name)) +
  geom_density() +
  scale_fill_brewer(palette = "Reds", guide = "none") +
  facet_wrap(~name, scales = "free")
df %>% 
  group_by(Participant) %>% 
  select(starts_with("LIE_")) |> 
  summarise_all(mean, na.rm=TRUE) |> 
  tidyr::pivot_longer(-Participant, values_to = "Scores") |> 
  mutate(name = paste0(str_replace(name, "_", " ("), ")")) |> 
  ggplot(aes(x = Scores, fill = name)) +
  geom_density() +
  scale_fill_manual(values = c("LIE (Ability)" = "#2196F3",
                               "LIE (Frequency)" = "#4CAF50",
                               "LIE (Contextuality)"= "#FF9800",
                               "LIE (Negativity)"= "#E91E63"),
                    guide = "none") +
  facet_wrap(~name, scales = "free")

df |> 
  group_by(Participant, Answer) |> 
  summarise(Confidence = paste(insight::format_value(mean(Confidence, na.rm = TRUE)),
                               " +- ",
                               insight::format_value(sd(Confidence, na.rm = TRUE))),
            RT = paste(insight::format_value(mean(RT, na.rm = TRUE)),
                       " +- ",
                       insight::format_value(sd(RT, na.rm = TRUE)))) |> 
  arrange(Participant) |> 
  knitr::kable()
df <- df |> 
  dplyr::filter(Participant != "S9",  # Extreme answers
                !Participant %in% c("S3", "S15", "S19", "S23"))  # No data
p1 <- df |> 
  dplyr::filter(!Participant %in% c("S29")) |>
  ggplot(aes(x = Confidence, fill = Participant)) +
  geom_density(alpha = 0.1) +
  see::scale_fill_material_d(palette = "rainbow", guide = "none") +
  see::theme_modern() +
  scale_x_continuous(labels = scales::percent, expand=expansion(c(0, .05))) +
  scale_y_continuous(expand=expansion(c(0, .05))) +
  facet_wrap(~Answer)
p2 <- df |> 
  dplyr::filter(!Participant %in% c("S29")) |> 
  ggplot(aes(x = RT, fill = Participant)) +
  geom_density(alpha = 0.1) +
  see::scale_fill_material_d(palette = "rainbow", guide = "none") +
  scale_x_continuous(expand=expansion(c(0, .05))) +
  scale_y_continuous(expand=expansion(c(0, .05))) +
  facet_wrap(~Answer)
p1 / p2
dfsub <- df |> 
  select(Participant, 
         starts_with("YONI_"), 
         starts_with("BES_"),
         starts_with("HCT_"),
         starts_with("MAIA_"),
         starts_with("LIE_")) |> 
  group_by(Participant) |> 
  summarise_all(mean)
r <- correlation(select(dfsub, starts_with("YONI_")),
                 select(dfsub, starts_with("BES_")), 
                 p_adjust = "none")

summary(r) |> 
  plot()
r <- correlation(select(dfsub, starts_with("MAIA_")),
                 select(dfsub, starts_with("HCT_")), 
                 p_adjust = "none")

summary(r) |> 
  plot()
r <- correlation(select(dfsub, starts_with(c("MAIA_", "HCT_"))),
                 select(dfsub, starts_with(c("YONI_", "BES_"))), 
                 p_adjust = "none")

summary(r) |> 
  plot()
model <- glmmTMB(RT ~ Answer * Phrasing + (1|Participant) + (1|Item), data = df) 

parameters::parameters(model, effects = "fixed")
estimate_means(model, at = c("Answer", "Phrasing")) |> 
  plot(show_data = "none") 
model <- glmmTMB(Confidence ~ Answer * Phrasing + (1|Participant) + (1|Item), data = df) 

parameters::parameters(model)
estimate_means(model, at = c("Answer", "Phrasing")) |> 
  plot(show_data = "none") 
# Adjustments for beta models
df$Confidence[df$Confidence == 1] <- 0.99999
df$Confidence[df$Confidence == 0] <- 0.00001

model <- glmmTMB(Confidence ~ RT * Answer + Phrasing + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

parameters::parameters(model, effects = "fixed")
estimate_relation(model, at = c("RT", "Answer")) |> 
  plot(length = 50, point = list(alpha = 0.3, size = 3.5)) 
model <- glmmTMB(Confidence ~ Answer * Condition + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

parameters::parameters(model, effects = "fixed")
estimate_means(model, at = c("Condition", "Answer")) |> 
  plot(show_data = "none") 
model <- glmmTMB(RT ~ Answer * Condition + (1|Participant) + (1|Item), 
                 data = df)

parameters::parameters(model, effects = "fixed")
estimate_means(model, at = c("Condition", "Answer")) |> 
  plot(show_data = "none") 
get_correlation <- function(var = "YONI_", var2 = "LIE_") {
  r <- correlation(select(dfsub, starts_with(var2)),
                 select(dfsub, starts_with(var)), 
                 p_adjust = "none") |> 
  mutate(Parameter1 = paste0(str_replace(Parameter1, "_", " ("), ")"),
         Parameter2 = paste0(str_replace(Parameter2, "_", " ("), ")"))

  p <- summary(r) |> 
    plot() +
    theme_minimal()
  list(r = r, plot = p)
}

r <- get_correlation(var = "YONI_")
r$plot
model <- glmmTMB(Confidence ~ Answer / (Condition * YONI_Total) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

get_parameters <- function(model) {
  # Parameters
  params <- parameters::parameters(model, effects = "fixed") 
  # Marginal effects
  at <- c("Answer", "Condition")
  trend <- insight::find_predictors(model)$conditional
  trend <- trend[!trend %in% at]
  marg <- modelbased::estimate_slopes(model, trend = trend, at = at)
  # Output
  list(params = params, marginal_effects = marg)
}

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect <- function(model, var = "YONI_Total", outcome = "Confidence") {
  data <- df |> 
    group_by(Participant, Answer, Condition) |> 
    summarise({{var}} := mean(.data[[var]], na.rm = TRUE),
              SD = sd(.data[[outcome]], na.rm = TRUE),
              {{outcome}} := mean(.data[[outcome]], na.rm = TRUE),
              CI_low = .data[[outcome]] - SD / 2,
              CI_high = .data[[outcome]] + SD / 2) 
  
  dodge_width <- 0.02 * diff(range(data[[var]]))
  ylab <- ifelse(outcome == "RT", "Reaction Time (s)", "Confidence")
  
  link_data <- estimate_relation(model, at = c("Condition", var, "Answer"), length = 30)
  ggplot(link_data, aes_string(x = var, y = "Predicted")) +
    geom_pointrange(data = data, aes_string(y = outcome, color = "Condition", ymin = "CI_low", ymax = "CI_high"), position = position_dodge(width = dodge_width)) +
    geom_ribbon(aes(ymin = CI_low, ymax = CI_high, fill = Condition), alpha = 0.33) + 
    geom_line(aes(color = Condition)) +
    labs(y = ylab, x = paste0(stringr::str_replace(var, "_", " ("), ")")) +
    scale_color_manual(values = c("Polygraph" = "#FF5722", "Social" = "#2196F3")) +
    scale_fill_manual(values = c("Polygraph" = "#FF5722", "Social" = "#2196F3")) +
    facet_wrap(~Answer) 
}

plot_effect(model, var = "YONI_Total", outcome = "Confidence")
model <- glmmTMB(RT ~ Answer / (Condition * YONI_Total) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect(model, var = "YONI_Total", outcome = "RT")
r <- get_correlation(var = "BES_")
r$plot
model <- glmmTMB(Confidence ~ Answer / (Condition * BES_Total) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect(model, var = "BES_Total", outcome = "Confidence")
model <- glmmTMB(RT ~ Answer / (Condition * BES_Total) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect(model, var = "BES_Total", outcome = "RT")
r <- get_correlation(var = "HCT_")
r$plot

# plot(correlation::cor_test(dfsub, "HCT_Accuracy", "LIE_Ability"))
model <- glmmTMB(Confidence ~ Answer / (Condition * HCT_Accuracy) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect(model, var = "HCT_Accuracy", outcome = "Confidence")
model <- glmmTMB(RT ~ Answer / (Condition * HCT_Accuracy) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect(model, var = "HCT_Accuracy", outcome = "RT")
model <- glmmTMB(Confidence ~ Answer / (Condition * HCT_Confidence) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect(model, var = "HCT_Confidence", outcome = "Confidence")
model <- glmmTMB(RT ~ Answer / (Condition * HCT_Confidence) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect(model, var = "HCT_Confidence", outcome = "RT")
model <- glmmTMB(Confidence ~ Answer / (Condition * HCT_Awareness) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect(model, var = "HCT_Awareness", outcome = "Confidence")
model <- glmmTMB(RT ~ Answer / (Condition * HCT_Awareness) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect(model, var = "HCT_Awareness", outcome = "RT")
r <- get_correlation(var = "MAIA_")
r$plot +
  theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust=1))
model <- glmmTMB(Confidence ~ Answer / (Condition * MAIA_Total) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect(model, var = "MAIA_Total", outcome = "Confidence")
model <- glmmTMB(RT ~ Answer / (Condition * MAIA_Total) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect(model, var = "MAIA_Total", outcome = "RT")
model <- glmmTMB(Confidence ~ Answer / (Condition * MAIA_Noticing) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect(model, var = "MAIA_Noticing", outcome = "Confidence")
model <- glmmTMB(RT ~ Answer / (Condition * MAIA_Noticing) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect(model, var = "MAIA_Noticing", outcome = "RT")
model <- glmmTMB(Confidence ~ Answer / (Condition * MAIA_NotDistracting) + (1|Participant) + (1|Item), 
                 data = df, family = beta_family())

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect(model, var = "MAIA_NotDistracting", outcome = "Confidence")
model <- glmmTMB(RT ~ Answer / (Condition * MAIA_NotDistracting) + (1|Participant) + (1|Item), 
                 data = df)

results <- get_parameters(model)
results$params
results$marginal_effects
plot_effect(model, var = "MAIA_NotDistracting", outcome = "RT")

Package References

report::cite_packages(sessionInfo())
  • Ben-Shachar M, Lüdecke D, Makowski D (2020). effectsize: Estimation of Effect Size Indices and Standardized Parameters. Journal of Open Source Software, 5(56), 2815. doi: 10.21105/joss.02815
  • H. Wickham. ggplot2: Elegant Graphics for Data Analysis. Springer-Verlag New York, 2016.
  • Hadley Wickham (2019). stringr: Simple, Consistent Wrappers for Common String Operations. R package version 1.4.0. https://CRAN.R-project.org/package=stringr
  • Hadley Wickham (2021). forcats: Tools for Working with Categorical Variables (Factors). R package version 0.5.1. https://CRAN.R-project.org/package=forcats
  • Hadley Wickham (2021). tidyr: Tidy Messy Data. R package version 1.1.4. https://CRAN.R-project.org/package=tidyr
  • Hadley Wickham and Jim Hester (2021). readr: Read Rectangular Text Data. R package version 2.0.2. https://CRAN.R-project.org/package=readr
  • Hadley Wickham, Romain François, Lionel Henry and Kirill Müller (2021). dplyr: A Grammar of Data Manipulation. R package version 1.0.7. https://CRAN.R-project.org/package=dplyr
  • Kirill Müller and Hadley Wickham (2021). tibble: Simple Data Frames. R package version 3.1.4. https://CRAN.R-project.org/package=tibble
  • Lionel Henry and Hadley Wickham (2020). purrr: Functional Programming Tools. R package version 0.3.4. https://CRAN.R-project.org/package=purrr
  • Lüdecke D, Ben-Shachar M, Patil I, Makowski D (2020). “Extracting,Computing and Exploring the Parameters of Statistical Models using R.”Journal of Open Source Software, 5(53), 2445. doi:10.21105/joss.02445 (URL: https://doi.org/10.21105/joss.02445).
  • Lüdecke D, Waggoner P, Makowski D (2019). “insight: A Unified Interfaceto Access Information from Model Objects in R.” Journal of Open SourceSoftware, 4(38), 1412. doi: 10.21105/joss.01412 (URL:https://doi.org/10.21105/joss.01412).
  • Lüdecke et al., (2021). performance: An R Package for Assessment, Comparison and Testing of Statistical Models. Journal of Open Source Software, 6(60), 3139. https://doi.org/10.21105/joss.03139
  • Lüdecke et al., (2021). see: An R Package for Visualizing Statistical Models. Journal of Open Source Software, 6(64), 3393. https://doi.org/10.21105/joss.03393
  • Makowski, D., Ben-Shachar, M. S. & Lüdecke, D. (2020). The {easystats} collection of R packages. GitHub.
  • Makowski, D., Ben-Shachar, M. S., Patil, I., & Lüdecke, D. (2019). Methods and Algorithms for Correlation Analysis in R. Journal of Open Source Software, 5(51), 2306. doi:10.21105/joss.02306
  • Makowski, D., Ben-Shachar, M. S., Patil, I., & Lüdecke, D. (2020). Estimation of Model-Based Predictions, Contrasts and Means. CRAN.
  • Makowski, D., Ben-Shachar, M., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. doi:10.21105/joss.01541
  • Makowski, D., Ben-Shachar, M.S., Patil, I. & Lüdecke, D. (2020). Automated Results Reporting as a Practical Tool to Improve Reproducibility and Methodological Best Practices Adoption. CRAN. Available from https://github.com/easystats/report. doi: .
  • Makowski, Lüdecke, Patil, Ben-Shachar, & Wiernik (2021). datawizard: Easy Data Wrangling. CRAN. Available from https://easystats.github.io/datawizard/
  • Mollie E. Brooks, Kasper Kristensen, Koen J. van Benthem, Arni Magnusson, Casper W. Berg, Anders Nielsen, Hans J. Skaug, Martin Maechler and Benjamin M. Bolker (2017). glmmTMB Balances Speed and Flexibility Among Packages for Zero-inflated Generalized Linear Mixed Modeling. The R Journal, 9(2), 378-400.
  • R Core Team (2021). R: A language and environment for statistical computing. R Foundation for Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.
  • Thomas Lin Pedersen (2020). patchwork: The Composer of Plots. R package version 1.1.1. https://CRAN.R-project.org/package=patchwork
  • Wickham et al., (2019). Welcome to the tidyverse. Journal of Open Source Software, 4(43), 1686, https://doi.org/10.21105/joss.01686

References

LS0tDQp0aXRsZTogJyoqVGhlIFBsYWNlIG9mIEludGVyb2NlcHRpb24gYW5kIFRoZW9yeSBvZiBNaW5kIGluIERlY2VwdGlvbioqJw0Kc3VidGl0bGU6IEEgU3VidGl0bGUNCm91dHB1dDoNCiAgaHRtbF9kb2N1bWVudDoNCiAgICB0aGVtZTogY2VydWxlYW4NCiAgICBoaWdobGlnaHQ6IHB5Z21lbnRzDQogICAgdG9jOiB5ZXMNCiAgICB0b2NfZGVwdGg6IDMNCiAgICB0b2NfZmxvYXQ6IHllcw0KICAgIG51bWJlcl9zZWN0aW9uczogbm8NCiAgICBkZl9wcmludDoga2FibGUNCiAgICBjb2RlX2ZvbGRpbmc6IGhpZGUNCiAgICBjb2RlX2Rvd25sb2FkOiB5ZXMNCiAgd29yZF9kb2N1bWVudDoNCiAgICByZWZlcmVuY2VfZG9jeDogdXRpbHMvVGVtcGxhdGVfV29yZC5kb2N4DQogICAgaGlnaGxpZ2h0OiBweWdtZW50cw0KICAgIHRvYzogbm8NCiAgICB0b2NfZGVwdGg6IDMNCiAgICBkZl9wcmludDoga2FibGUNCiAgICBudW1iZXJfc2VjdGlvbnM6IHllcw0KICBybWFya2Rvd246Omh0bWxfdmlnbmV0dGU6DQogICAgdG9jOiB5ZXMNCiAgICB0b2NfZGVwdGg6IDMNCiAgcGRmX2RvY3VtZW50Og0KICAgIHRvYzogeWVzDQogICAgdG9jX2RlcHRoOiAnMicNCiAgICBsYXRleF9lbmdpbmU6IHhlbGF0ZXgNCmVkaXRvcl9vcHRpb25zOg0KICBjaHVua19vdXRwdXRfdHlwZTogY29uc29sZQ0KYmlibGlvZ3JhcGh5OiB1dGlscy9iaWJsaW9ncmFwaHkuYmliDQpjc2w6IHV0aWxzL2FwYS5jc2wNCi0tLQ0KDQoNCjwhLS0gDQohISEhIElNUE9SVEFOVDogcnVuIGBzb3VyY2UoInV0aWxzL3JlbmRlci5SIilgIHRvIHB1Ymxpc2ggaW5zdGVhZCBvZiBjbGlja2luZyBvbiAnS25pdCcNCi0tPg0KDQpgYGB7ciBzZXR1cCwgd2FybmluZz1GQUxTRSwgbWVzc2FnZT1UUlVFLCBpbmNsdWRlPUZBTFNFfQ0KIyBTZXQgdXAgdGhlIGVudmlyb25tZW50IChvciB1c2UgbG9jYWwgYWx0ZXJuYXRpdmUgYHNvdXJjZSgidXRpbHMvY29uZmlnLlIiKWApDQpzb3VyY2UoImh0dHBzOi8vcmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbS9SZWFsaXR5QmVuZGluZy9UZW1wbGF0ZVJlc3VsdHMvbWFpbi91dGlscy9jb25maWcuUiIpICANCg0KbGlicmFyeShnZ3Bsb3QyKQ0KdGhlbWVfc2V0KHNlZTo6dGhlbWVfbW9kZXJuKCkpDQpgYGANCg0KIyBJbnRyb2R1Y3Rpb24NCg0KYGBge3IgYmFkZ2VzLCBlY2hvPUZBTFNFLCBtZXNzYWdlPVRSVUUsIHdhcm5pbmc9RkFMU0UsIHJlc3VsdHM9J2FzaXMnfQ0KIyBUaGlzIGNodW5rIGlzIGEgYml0IGNvbXBsZXggc28gZG9uJ3Qgd29ycnkgYWJvdXQgaXQ6IGl0J3MgbWFkZSB0byBhZGQgYmFkZ2VzIHRvIHRoZSBIVE1MIHZlcnNpb25zDQojIE5PVEU6IFlvdSBoYXZlIHRvIHJlcGxhY2UgdGhlIGxpbmtzIGFjY29yZGluZ2x5IHRvIGhhdmUgd29ya2luZyAiYnV0dG9ucyIgb24gdGhlIEhUTUwgdmVyc2lvbnMNCmlmICgha25pdHI6OmlzX2xhdGV4X291dHB1dCgpICYmIGtuaXRyOjppc19odG1sX291dHB1dCgpKSB7DQogIGNhdCgiIVtCdWlsZF0oaHR0cHM6Ly9naXRodWIuY29tL1JlYWxpdHlCZW5kaW5nL1RlbXBsYXRlUmVzdWx0cy93b3JrZmxvd3MvQnVpbGQvYmFkZ2Uuc3ZnKQ0KICAgICAgWyFbV2Vic2l0ZV0oaHR0cHM6Ly9pbWcuc2hpZWxkcy5pby9iYWRnZS9yZXBvLVJlYWRtZS0yMTk2RjMpXShodHRwczovL2dpdGh1Yi5jb20vUmVhbGl0eUJlbmRpbmcvVGVtcGxhdGVSZXN1bHRzKQ0KICAgICAgWyFbV2Vic2l0ZV0oaHR0cHM6Ly9pbWcuc2hpZWxkcy5pby9iYWRnZS92aXNpdC13ZWJzaXRlLUU5MUU2MyldKGh0dHBzOi8vcmVhbGl0eWJlbmRpbmcuZ2l0aHViLmlvL1RlbXBsYXRlUmVzdWx0cy8pDQogICAgICBbIVtXZWJzaXRlXShodHRwczovL2ltZy5zaGllbGRzLmlvL2JhZGdlL2Rvd25sb2FkLS5kb2N4LUZGNTcyMildKGh0dHBzOi8vZ2l0aHViLmNvbS9SZWFsaXR5QmVuZGluZy9UZW1wbGF0ZVJlc3VsdHMvcmF3L21haW4vd29yZF9hbmRfcGRmL1N1cHBsZW1lbnRhcnlNYXRlcmlhbHMuZG9jeCkNCiAgICAgIFshW1dlYnNpdGVdKGh0dHBzOi8vaW1nLnNoaWVsZHMuaW8vYmFkZ2Uvc2VlLS5wZGYtRkY5ODAwKV0oaHR0cHM6Ly9naXRodWIuY29tL1JlYWxpdHlCZW5kaW5nL1RlbXBsYXRlUmVzdWx0cy9ibG9iL21haW4vd29yZF9hbmRfcGRmL1N1cHBsZW1lbnRhcnlNYXRlcmlhbHMucGRmKSIpDQp9DQpgYGANCg0KDQo8IS0tIFRpdGxlIGlkZWFzOi0tPg0KPCEtLSBUaGVvcnkgb2YgQm9keSBhbmQgT3RoZXJzIC0tPg0KDQojIFBhY2thZ2VzICYgRGF0YQ0KDQojIyBQYWNrYWdlcw0KDQpUaGlzIGRvY3VtZW50IHdhcyBwcmVwYXJlZCBvbiBgciBmb3JtYXQoU3lzLkRhdGUoKSlgLiANCg0KYGBge3Igd2FybmluZz1GQUxTRSwgbWVzc2FnZT1UUlVFLCByZXN1bHRzPSdhc2lzJ30NCmxpYnJhcnkodGlkeXZlcnNlKQ0KbGlicmFyeShwYXRjaHdvcmspDQpsaWJyYXJ5KGdsbW1UTUIpDQpsaWJyYXJ5KHJlcG9ydCkNCmxpYnJhcnkocGFyYW1ldGVycykNCmxpYnJhcnkoY29ycmVsYXRpb24pDQpsaWJyYXJ5KG1vZGVsYmFzZWQpDQpsaWJyYXJ5KHBlcmZvcm1hbmNlKQ0KbGlicmFyeShzZWUpDQoNCnN1bW1hcnkocmVwb3J0OjpyZXBvcnQoc2Vzc2lvbkluZm8oKSkpDQpgYGANCg0KDQojIyBEYXRhDQoNCmBgYHtyIHdhcm5pbmc9RkFMU0UsIG1lc3NhZ2U9VFJVRSwgcmVzdWx0cz0nYXNpcyd9DQojIHNldHdkKCJDOi9Vc2Vycy91c2VyL0Rlc2t0b3AvU3B1dG5pay8yMDE5LTIzL0RlY2VwdGlvbkludGVyb1RvbSIpDQoNCmRmIDwtIHJlYWQuY3N2KCJkYXRhL2RhdGFfY29tYmluZWQuY3N2IikgJT4lIA0KICBtdXRhdGUoSUQgPSBhcy5mYWN0b3IocGFzdGUwKCJTIiwgSUQpKSwNCiAgICAgICAgIGNvbmRpdGlvbiA9IGFzLmZhY3Rvcihjb25kaXRpb24pLA0KICAgICAgICAgaXRlbSA9IGFzLmZhY3RvcihpdGVtKSwNCiAgICAgICAgIHN0eWxlID0gYXMuZmFjdG9yKHN0eWxlKSwNCiAgICAgICAgIGluc3RydWN0aW9uID0gYXMuZmFjdG9yKGluc3RydWN0aW9uKSkgfD4gDQogICNUT0RPOiBUaGlzIHJlbmFtaW5nIHNob3VsZCBiZSBkb25lIGF0IHRoZSBwcmVwcm9jZXNzaW5nIHN0YWdlDQogIHJlbmFtZSgiUGFydGljaXBhbnQiID0gIklEIiwNCiAgICAgICAgICJDb25kaXRpb24iID0gImNvbmRpdGlvbiIsDQogICAgICAgICAiSXRlbSIgPSAiaXRlbSIsDQogICAgICAgICAiUGhyYXNpbmciID0gInN0eWxlIiwNCiAgICAgICAgICJBbnN3ZXIiID0gImluc3RydWN0aW9uIiwNCiAgICAgICAgICJZT05JX1RvdGFsIiA9ICJ5b25pX3RvdGFsIiwNCiAgICAgICAgICJZT05JX0FmZmVjdGl2ZSIgPSAieW9uaV9hZmZlY3RpdmUiLA0KICAgICAgICAgIllPTklfQ29nbml0aXZlIiA9ICJ5b25pX2NvZ25pdGl2ZSIsDQogICAgICAgICAiWU9OSV9QaHlzaWNhbCIgPSAieW9uaV9waHlzaWNhbCIsDQogICAgICAgICAiQkVTX1RvdGFsIiA9ICJCRVNfdG90YWwiLA0KICAgICAgICAgIkJFU19Db2duaXRpdmUiID0gIkJFU19jb2duaXRpdmUiLA0KICAgICAgICAgIkJFU19BZmZlY3RpdmUiID0gIkJFU19hZmZlY3RpdmUiLA0KICAgICAgICAgIkhDVF9Db25maWRlbmNlIiA9ICJIQ1RfY29uZmlkZW5jZSIsDQogICAgICAgICAiSENUX0FjY3VyYWN5IiA9ICJIQ1RfYWNjdXJhY3kiLA0KICAgICAgICAgIkhDVF9Bd2FyZW5lc3MiID0gIkhDVF9hd2FyZW5lc3MiLA0KICAgICAgICAgIk1BSUFfVG90YWwiID0gIk1BSUFfdG90YWwiLA0KICAgICAgICAgIk1BSUFfQXR0ZW50aW9uUmVndWxhdGlvbiIgPSAiTUFJQV9hdHRlbnRpb25fcmVndWxhdGlvbiIsDQogICAgICAgICAiTUFJQV9Cb2R5TGlzdGVuaW5nIiA9ICJNQUlBX2JvZHlfbGlzdGVuaW5nIiwNCiAgICAgICAgICJNQUlBX0Vtb3Rpb25hbEF3YXJlbmVzcyIgPSAiTUFJQV9lbW90aW9uYWxfYXdhcmVuZXNzIiwNCiAgICAgICAgICJNQUlBX05vdERpc3RyYWN0aW5nIiA9ICJNQUlBX25vdF9kaXN0cmFjdGluZyIsDQogICAgICAgICAiTUFJQV9Ob3RXb3JyeWluZyIgPSAiTUFJQV9ub3Rfd29ycnlpbmciLA0KICAgICAgICAgIk1BSUFfTm90aWNpbmciID0gIk1BSUFfbm90aWNpbmciLA0KICAgICAgICAgIk1BSUFfU2VsZlJlZ3VsYXRpb24iID0gIk1BSUFfc2VsZl9yZWd1bGF0aW9uIiwNCiAgICAgICAgICJNQUlBX1RydXN0aW5nIiA9ICJNQUlBX3RydXN0aW5nIiwNCiAgICAgICAgICJMSUVfQWJpbGl0eSIgPSAibGllX2FiaWxpdHkiLA0KICAgICAgICAgIkxJRV9GcmVxdWVuY3kiID0gImxpZV9mcmVxdWVuY3kiLA0KICAgICAgICAgIkxJRV9OZWdhdGl2aXR5IiA9ICJsaWVfbmVnYXRpdml0eSIsDQogICAgICAgICAiTElFX0NvbnRleHR1YWxpdHkiID0gImxpZV9jb250ZXh0dWFsaXR5IiwNCiAgICAgICAgICJDb25maWRlbmNlIiA9ICJEVF9jb25maWRlbmNlIiwNCiAgICAgICAgICJSVCIgPSAiRFRfUlQiKSB8PiANCiAgbXV0YXRlKEFuc3dlciA9IGZjdF9yZWNvZGUoQW5zd2VyLCBMaWUgPSAiTElFIiwgVHJ1dGggPSAiVFJVVEgiKSkgfD4gDQogIHNlbGVjdCgtSENUX2d1ZXNzLCAtSENUX25vZ3Vlc3MsIC1IQ1Rfb25lYnJlYXRoKQ0KDQpjYXQocGFzdGUoIlRoZSBkYXRhIGNvbnNpc3RzIG9mIiwNCiAgICAgICAgICByZXBvcnQ6OnJlcG9ydF9wYXJ0aWNpcGFudHMoZGYsDQogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIHBhcnRpY2lwYW50cyA9ICJQYXJ0aWNpcGFudCIsDQogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIHNleCA9ICJHZW5kZXIiLA0KICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICBhZ2UgPSAiQWdlIikpKQ0KYGBgDQoNCg0KDQoNCiMgTWVhc3VyZXMgey50YWJzZXR9DQoNCmBgYHtyIGNoaWxkPScxX01lYXN1cmVzLlJtZCd9DQpgYGANCg0KDQojIE1hbmlwdWxhdGlvbiBDaGVja3MNCg0KDQpgYGB7ciBjaGlsZD0nMl9NYW5pcHVsYXRpb25DaGVja3MuUm1kJ30NCmBgYA0KDQojIFRoZW9yeSBvZiBNaW5kIC8gRW1wYXRoeQ0KDQpgYGB7ciBjaGlsZD0nM19Ub00uUm1kJ30NCmBgYA0KDQojIEludGVyb2NlcHRpb24NCg0KYGBge3IgY2hpbGQ9JzRfSW50ZXJvLlJtZCd9DQpgYGANCg0KDQojIEZ1bGwgQ29kZQ0KDQpUaGUgZnVsbCBzY3JpcHQgb2YgZXhlY3V0aXZlIGNvZGUgY29udGFpbmVkIGluIHRoaXMgZG9jdW1lbnQgaXMgcmVwcm9kdWNlZCBoZXJlLg0KDQpgYGB7ciBmdWxsX2NvZGUsIHJlZi5sYWJlbD1rbml0cjo6YWxsX2xhYmVscygpLCBldmFsPUZBTFNFfQ0KYGBgDQoNCiMgUGFja2FnZSBSZWZlcmVuY2VzDQoNCmBgYHtyIHdhcm5pbmc9RkFMU0UsIG1lc3NhZ2U9RkFMU0UsIHJlc3VsdHM9J2FzaXMnfQ0KcmVwb3J0OjpjaXRlX3BhY2thZ2VzKHNlc3Npb25JbmZvKCkpDQpgYGANCg0KDQojIFJlZmVyZW5jZXMNCg==